Make all hydro plots

In [1]:
%matplotlib inline
# %pylab ipympl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib as mpl
import scipy.integrate
import sys
import os
import time
import logging
import collections
import pickle

# spherical harmonics tools
import pyshtools.expand 
import pyshtools.spectralanalysis

import nugridpy.ascii_table as table
from nugridpy import nugridse as nuse
import nugridpy.utils as utils
import nugridpy.astronomy as ast

from ppmpy import ppm

from astropy import constants as const
from astropy import units as u

Msun = (const.M_sun.to(u.g)).value
Rsun = (const.R_sun.to(u.cm)).value

cb = utils.linestylecb # colours

# plot things
ifig = 0
ptrack = {}

# figure sizes
stdSize = 3.5
stdRatio = 4./3.
mollSize = 2.5
mollRatio = 3.5 / 2.5
horizSize = 3.3
horizRatio = 6 / 3.5

# turn off matplotlib messages
logging.getLogger("matplotlib").setLevel(logging.ERROR)
In [2]:
# to easily compare between the two models, a master dictionary is used
run_dependent_quantities = ['moms', 'rprof', 'dump0', 'time0', 'dumpMax', 'dumps', 'times', 'initbase', 
                            'simDump0', 'simTime0', 'simDumpMax', 'mbot', 'mtop']
runs = ['n15', 'n16', 'n17']

# to get "run_dependent_quantities", use a named tuple
allruns = collections.namedtuple('allruns', run_dependent_quantities)

# temporary fill values for anything past rprof
tempfill = [40] * (len(run_dependent_quantities)-2)

Although the paper may change since the origin of this notebook, the figures and what they are conveying should not change significantly. The plots are done in sections like that.

collect all of the quantities for each named tuple and create the master dictionary

In [3]:
# N15
moms_dir = '/data/ASDR/PPMstar/LowZRAWD/N15-LowZRAWD-768-10x-burn-moms/myavsbq'
rprof_dir = '/data/ASDR/PPMstar/LowZRAWD/N15-LowZRAWD-768-10x-burn-moms/prfs/'

var_list = ['xc','ux','uy','uz','|ut|','|ur|','|w|','P','rho','fv']
n15 = allruns(ppm.MomsDataSet(moms_dir,400,2,var_list,rprofset=ppm.RprofSet(rprof_dir)),
              ppm.RprofSet(rprof_dir),*tempfill)

# N16
moms_dir = '/data/ASDR/PPMstar/LowZRAWD/N16-LowZRAWD-1536-10x-burn-moms/myavsbq'
rprof_dir = '/data/ASDR/PPMstar/LowZRAWD/N16-LowZRAWD-1536-10x-burn-moms/prfs/'

var_list = ['xc','ux','uy','uz','|ut|','|ur|','|w|','P','rho','fv']
n16 = allruns(ppm.MomsDataSet(moms_dir,400,2,var_list,rprofset=ppm.RprofSet(rprof_dir)),
              ppm.RprofSet(rprof_dir),*tempfill)

# N17
moms_dir = '/data/ASDR/PPMstar/LowZRAWD/N17-LowZRAWD-1152-100x-burn-moms/myavsbq'
rprof_dir = '/data/ASDR/PPMstar/LowZRAWD/N17-LowZRAWD-1152-100x-burn-moms/prfs/'

var_list = ['xc','ux','uy','uz','|ut|','|ur|','|w|','P','rho','fv']
n17 = allruns(ppm.MomsDataSet(moms_dir,400,2,var_list,rprofset=ppm.RprofSet(rprof_dir)),
              ppm.RprofSet(rprof_dir),*tempfill)
3556 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N15-LowZRAWD-768-10x-burn-moms/prfs/.
Dump numbers range from 0 to 3555.
3556 .aaa files found in '/data/ASDR/PPMstar/LowZRAWD/N15-LowZRAWD-768-10x-burn-moms/myavsbq/.
Dump numbers range from 0 to 3555.
3556 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N15-LowZRAWD-768-10x-burn-moms/prfs/.
Dump numbers range from 0 to 3555.
1618 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N16-LowZRAWD-1536-10x-burn-moms/prfs/.
Dump numbers range from 0 to 1617.
1616 .aaa files found in '/data/ASDR/PPMstar/LowZRAWD/N16-LowZRAWD-1536-10x-burn-moms/myavsbq/.
Dump numbers range from 0 to 1615.
1618 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N16-LowZRAWD-1536-10x-burn-moms/prfs/.
Dump numbers range from 0 to 1617.
1373 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N17-LowZRAWD-1152-100x-burn-moms/prfs/.
Dump numbers range from 0 to 1372.
1371 .aaa files found in '/data/ASDR/PPMstar/LowZRAWD/N17-LowZRAWD-1152-100x-burn-moms/myavsbq/.
Dump numbers range from 0 to 1370.
1373 rprof files found in '/data/ASDR/PPMstar/LowZRAWD/N17-LowZRAWD-1152-100x-burn-moms/prfs/.
Dump numbers range from 0 to 1372.

Convenience functions used throughout

In [4]:
# convert a mass coordinate to radius
def m2r(masscoord, myrun, dump):
    # stop printing, too much
    devnull = open(os.devnull, 'w')
    with RedirectStdStreams(stdout=devnull, stderr=devnull):
        m = myrun.rprof.compute_m(dump)
        r = myrun.rprof.get('R',fname=dump,resolution='l')
    return ppm.interpolate(m,r,masscoord)

def readSCFile(initbase, fileExt, dump):
    """
    Read in a .shell or .central file at dump
    Returns the data and header dictionaries
    """

    # counters
    headerLines = 0
    dataLines = 0

    # lists to hold results
    data_keys =[]
    header = {}

    # open the file
    openfile = initbase + '-{0:04d}.{1:s}'.format(dump, fileExt)

    with open(openfile) as myfile:

        # get all lines
        lines = [line.rstrip() for line in myfile]

        # run through the lines
        for line in lines:

            # grab the data keys
            if line.startswith('#'):
                data_keys.extend(line.split(' ')[1:])
                headerLines += 1

            # Grab the header data
            elif line.find('=') != -1:

                # split across = sign, remove whitespace
                split = line.split('=')

                # Now I need to know what type the header should be!
                if split[1].strip().isdecimal():
                    hval = int(split[1].strip())
                elif split[1].strip().find('.') != -1:
                    hval = float(split[1].strip())
                else:
                    hval = split[1].strip()

                # put into dictionary
                header[split[0].strip()] = hval
                headerLines += 1

            # we are now reading in data, this is after all header data
            else:

                # if we are at the first line, create arrays and dict
                if dataLines == 0:
                    data = collections.OrderedDict(zip(data_keys,
                                    map(np.zeros, [len(lines) - headerLines] * len(data_keys))))

                # split the line into its numbers
                line = " ".join(line.split())
                split = line.split(' ')

                for i, key in enumerate(data.keys()):
                    data[key][dataLines] = float(split[i])

                # update line number
                dataLines += 1
                
    return data, header

# use remove print to console at certain points then return
class RedirectStdStreams(object):
    def __init__(self, stdout=None, stderr=None):
        self._stdout = stdout or sys.stdout
        self._stderr = stderr or sys.stderr

    def __enter__(self):
        self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
        self.old_stdout.flush(); self.old_stderr.flush()
        sys.stdout, sys.stderr = self._stdout, self._stderr

    def __exit__(self, exc_type, exc_value, traceback):
        self._stdout.flush(); self._stderr.flush()
        sys.stdout = self.old_stdout
        sys.stderr = self.old_stderr

Determining radial and horizontal timescales

In [5]:
def shellNpoints(r_onShell):

    # get npoints to ensure a perfect sampling up to an lmax
    npoints = np.zeros(len(r_onShell), dtype='int32')
    lmax = np.zeros(len(r_onShell), dtype='int32')

    for i, radius in enumerate(r_onShell):

        lmax_r, N, npoints_r = hydro.moms.sphericalHarmonics_lmax(radius)

        if lmax_r > my_lmax:
            lmax[i] = my_lmax
            N = 2 * (my_lmax + 1)
            npoints[i] = N * 2*N
        else:
            lmax[i] = lmax_r
            npoints[i] = npoints_r

    return npoints, lmax

# get quantities on a shell from the moments data
def shellDerive(dump, r_onShell):
    """

    dump: an integer for the dump number to be read in
    r_onShell: the radius grid with which I interpolate quantities to
    """
    # for this dump, get ur and rho on the grid
    ur_grid = hydro.moms.get_spherical_components('ux', 'uy', 'uz', dump)[0]

    # get rprof quantities used later
    r_rprof = hydro.rprof.get('R', fname=dump, resolution='l')
    ut_rprof = hydro.rprof.get('|Ut|', fname=dump, resolution='l')
    rho_rprof = hydro.rprof.get('Rho0', fname=dump, resolution='l') + hydro.rprof.get('Rho1', fname=dump, resolution='l')
    v_rprof = np.sqrt(np.power(hydro.rprof.get('|U|', fname=dump),2.0) - 
                      np.power(hydro.rprof.get('|Ut|', fname=dump), 2.0))
    
    rho_onShell = ppm.interpolate(r_rprof, rho_rprof, r_onShell)
    Avgv_onShell = ppm.interpolate(r_rprof, v_rprof, r_onShell)
    
    # to loop through CENTRAL quantities
    r_central = (r_onShell + np.roll(r_onShell, -1))[0:-1]/2.
    ut_central = ppm.interpolate(r_rprof, ut_rprof, r_central)
    dt_horizontal = np.zeros((len(power_func), len(r_central)))
    gamma = np.zeros((len(power_func), len(r_onShell)))

    # get the npoints at each radii as well as lmax
    npoints, lmax = shellNpoints(r_onShell)

    # loop through every CENTRAL radii to get the derived CENTRAL quantities
    for count, r, lmax_r, ut_r, npoints_r in zip(np.arange(len(r_central)), r_central, lmax,
                                                 ut_central, npoints):
        # GAMMA QUANTITIES

        # first get the properly formatted interpolation of ur, then get powerSpectrum
        ur_formatted = hydro.moms.sphericalHarmonics_format(ur_grid, r, fname=dump, lmax=lmax_r)
        coeffs_ur = pyshtools.expand.SHExpandDH(ur_formatted, sampling=2)

        # drop l=0
        power_ur = pyshtools.spectralanalysis.spectrum(coeffs_ur, unit='per_l')[1:]
        
        # for every power_func we get the appropriate dt_horizontal
        for i, function in enumerate(power_func):
            dt_horizontal[i, count] = function(power_ur, ut_r, r, **power_func_kwargs[i])

    # DERIVE GAMMA MASS FLUX
    # first get the mass transport. This is ALWAYS choosing the "top shell" as it is
    # inherently a "central" quantity, mass is flowing out through the top of the shell,  but I am
    # deriving it on shells. This means the first index is ENTIRELY WRONG (I WOULD NEVER USE IT ANYWAYS)
    dmdt_radial = 2 * np.pi * (r_onShell**2 * rho_onShell * Avgv_onShell)[1:]

    # get the radial mixing timescale, although inherently a "central" quantity I will roll it to
    # get quantity on the shells. Because of above, I will ensure that the first index is
    # ENTIRELY WRONG (I WOULD NEVER USE IT ANYWAYS)
    dt_radial = np.diff(r_onShell) / (Avgv_onShell[1:])

    # Gamma is a central quantity with its factor on the "lower shell" dictating the horizontal
    # mass transfer of that cell. So, the last radii of gamma is always zero!
    gamma[:, 0:-1] = np.abs(dmdt_radial[np.newaxis, :] * (dt_radial[np.newaxis, :] / dt_horizontal))

    # shell quantities
    return gamma, dt_radial, dt_horizontal

############################################################################################
#
# GAMMA FUNCTIONS
#
############################################################################################

def gamma_weightedPower(power_ur, ut_r, r, factor=None):

    # first we smooth the profile
    smooth = 5
    smoothed_power_ur = np.convolve(power_ur, np.ones((smooth,))/smooth, mode='same')

    # find index of max power if factor != 0
    if factor is None:
        max_i = len(power_ur) - 1
        max_l = len(power_ur)

    else:
        max_i = np.argmin(abs(np.max(smoothed_power_ur) * factor - power_ur))
        max_l = max_i + 1

    # collect the weights up to max_i
    weights = power_ur[0:max_i+1] / np.sum(power_ur[0:max_i+1])

    # what is the timescale?
    l = np.arange(1, max_l+1)
    wave_by_2 = np.pi * r / np.sqrt(l * (l + 1))
    dt_per_l = wave_by_2 / ut_r
    dt = np.sum(weights * dt_per_l)
    
    return dt

Run and other Constants

In [6]:
atomicnoH = 1.
atomicnoC12 = 12.
atomicnocld = n15.rprof.get('atomicnocld',fname=0)
atomicnoair = n15.rprof.get('atomicnoair',fname=0)
fkcld = n15.rprof.get('fkcld',fname=0)
fkair = n15.rprof.get('fkair',fname=0)
cldmu = n15.rprof.get('cldmu',fname=0)
airmu = n15.rprof.get('airmu',fname=0)

Everything should be done at a single dump

In [7]:
# 768 and 1536 should be the same time but...
n16_dump0 = 650
n16_time0 = n16.rprof.get('t',n16_dump0)

# i will construct the time arrays, history is wrong!
n17_dumps = np.array(n17.rprof.get_dump_list())
n16_dumps = np.array(n16.rprof.get_dump_list())
n15_dumps = np.array(n15.rprof.get_dump_list())
n17_times = np.zeros(len(n17_dumps))
n16_times = np.zeros(len(n16_dumps))
n15_times = np.zeros(len(n15_dumps))

for i,dump in enumerate(n17_dumps):
    n17_times[i] = n17.rprof.get('t',dump)
    
for i,dump in enumerate(n16_dumps):
    n16_times[i] = n16.rprof.get('t',dump)
    
for i,dump in enumerate(n15_dumps):
    n15_times[i] = n15.rprof.get('t',dump)
    
# now find what the appropriate time and dump 768 is
dumpi = np.argmin(abs(n16_time0 - n15_times))
n15_dump0 = n16_dumps[dumpi]
n15_time0 = n15_times[dumpi]

dumpi = np.argmin(abs(n16_time0 - n17_times))
n17_dump0 = n16_dumps[dumpi]
n17_time0 = n17_times[dumpi]

Seeing that the runs have been done for longer, there are some forced dumpMax

In [8]:
n15_dumpMax = n15_dumps[-1]
n16_dumpMax = n16_dumps[-1]
n17_dumpMax = n17_dumps[-1]

Find the dump0 and time0 for the python simulation runs for post-processing

In [11]:
asdr_dir = '/data/ASDR/PPMstar/3D1D-advection/'
In [12]:
# set the initbase, i.e reading in the run files
base = asdr_dir+'N15_shell_files'
n15_initbase = os.path.join(base, 'N15-standard-mppnp', 'N15-standard-mppnp')
data, header = readSCFile(n15_initbase, 'shell', 100)

# grab the dump quantities
n15_simDump0 = header['runDump']
n15_simTime0 = n15_times[n15_simDump0]
n15_simDumpMax = len([afile for afile in os.listdir(os.path.dirname(n15_initbase)) 
                      if os.path.isfile(os.path.join(os.path.dirname(n15_initbase), afile))]) - 1 + n15_simDump0

# get the mass coordinates of the boundary
n15_mbot = data['m'][0]
n15_mtop = data['m'][-1]

# set the initbase, i.e reading in the run files
base = asdr_dir+'N16_shell_files'
n16_initbase = os.path.join(base, 'N16-standard-mppnp', 'N16-standard-mppnp')
data, header = readSCFile(n16_initbase, 'shell', 100)

# grab the dump quantities
n16_simDump0 = header['runDump']
n16_simTime0 = n16_times[n16_simDump0]
n16_simDumpMax = len([afile for afile in os.listdir(os.path.dirname(n16_initbase)) 
                      if os.path.isfile(os.path.join(os.path.dirname(n16_initbase), afile))]) - 1 + n16_simDump0

# get the mass coordinates of the boundary
n16_mbot = data['m'][0]
n16_mtop = data['m'][-1]

# set the initbase, i.e reading in the run files
base = asdr_dir+'N17_shell_files'
n17_initbase = os.path.join(base, 'N17-standard-mppnp', 'N17-standard-mppnp')
data, header = readSCFile(n17_initbase, 'shell', 100)

# grab the dump quantities
n17_simDump0 = header['runDump']
n17_simTime0 = n17_times[n17_simDump0]
n17_simDumpMax = len([afile for afile in os.listdir(os.path.dirname(n17_initbase)) 
                      if os.path.isfile(os.path.join(os.path.dirname(n17_initbase), afile))]) - 1 + n17_simDump0

# get the mass coordinates of the boundary
n17_mbot = data['m'][0]
n17_mtop = data['m'][-1]
In [13]:
# after calculating everything else for the named tuple, set them
n15 = n15._replace(dump0=n15_dump0, time0=n15_time0, dumpMax=n15_dumpMax, dumps=n15_dumps, times=n15_times, 
                  initbase=n15_initbase, simDump0=n15_simDump0, simTime0=n15_simTime0, 
                  simDumpMax=n15_simDumpMax, mbot=n15_mbot, mtop=n15_mtop)

n16 = n16._replace(dump0=n16_dump0, time0=n16_time0, dumpMax=n16_dumpMax, dumps=n16_dumps, times=n16_times, 
                  initbase=n16_initbase, simDump0=n16_simDump0, simTime0=n16_simTime0, 
                  simDumpMax=n16_simDumpMax, mbot=n16_mbot, mtop=n16_mtop)

n17 = n17._replace(dump0=n17_dump0, time0=n17_time0, dumpMax=n17_dumpMax, dumps=n17_dumps, times=n17_times, 
                  initbase=n17_initbase, simDump0=n17_simDump0, simTime0=n17_simTime0, 
                  simDumpMax=n17_simDumpMax, mbot=n17_mbot, mtop=n17_mtop)

# master dictionary
run = collections.OrderedDict(zip(runs, [n15, n16, n17]))

Radial Profiles

Showcase the radial profiles of the radial, tangential and total velocity. Also show the heating region of PPMStar

In [14]:
# heating region constants
radbase = 7.282
dlayerbot = 2.0

rminbase = radbase + .5 * dlayerbot
rmaxbase = radbase + 1.5 * dlayerbot

# dictionaries for run specific quantities
initbase = collections.OrderedDict((run, []) for run in runs)
ur = collections.OrderedDict((run, []) for run in runs)
ur_rms = collections.OrderedDict((run, []) for run in runs)
ut = collections.OrderedDict((run, []) for run in runs)
ut_gosh = collections.OrderedDict((run, []) for run in runs)
u = collections.OrderedDict((run, []) for run in runs)
r = collections.OrderedDict((run, []) for run in runs)
r_stream = collections.OrderedDict((run, []) for run in runs)
dumpStart = collections.OrderedDict((run, []) for run in runs)
dumpEnd = collections.OrderedDict((run, []) for run in runs)
In [15]:
# loop through the different runs
for runstr, myrun in run.items():
        
    # velocities from rprofs
    u[runstr] = myrun.rprof.get('|U|',fname=myrun.dump0)
    ut[runstr] = myrun.rprof.get('|Ut|',fname=myrun.dump0)
    ur[runstr] = myrun.rprof.get('Ur',fname=myrun.dump0)
    ur_rms[runstr] = np.sqrt(np.power(u[runstr],2.0) - np.power(ut[runstr],2.0))
    r[runstr] = myrun.rprof.get('R',fname=myrun.dump0,resolution='l')
    
    # convert velocities to km/s
    u[runstr] *= 1e3
    ut[runstr] *= 1e3
    ur[runstr] *= 1e3
    ur_rms[runstr] *= 1e3
    
# loop through the dumps of each run and determine average ut for detecting GOSH
avg_offset = 1
avg_zone = 1

for runstr, myrun in run.items():
    
    # dumps to use
    dumpStart[runstr] = myrun.simDump0
    dumpEnd[runstr] = myrun.dumpMax
    
    alldumps = list(range(dumpStart[runstr], dumpEnd[runstr]))
    
    # initialize array to hold averages
    ut_gosh[runstr] = np.zeros(len(alldumps))
    
    # for every dump to analyze
    for dump in alldumps:
            
        # grab ut and r
        ut_dump = myrun.rprof.get('|Ut|',fname=dump)
        ut_dump[np.isnan(ut_dump)] = 0.
        ut_dump *= 1e3
        r_dump = myrun.rprof.get('R',fname=dump)
        
        # determine the upper boundary
        rtop = m2r(myrun.mtop, myrun, dump)
        rtop_avg = rtop - avg_offset
        rbot_avg = rtop_avg - avg_zone
        
        # find indices for the averaging
        avg_indices = ((r_dump > rbot_avg) & (r_dump < rtop_avg))
        ut_gosh[runstr][dump-alldumps[0]] = ut_dump[avg_indices].mean()

Vrprofs

Plot dependent dictionaries

In [16]:
# which ones to use
run_plots = ['n15', 'n16']

# boundaries, etc.
myylim = [0,30]
myxlim = [5,30]
rtop = m2r(run['n16'].mtop, run['n16'], run['n16'].dump0)
rbot = m2r(run['n16'].mbot, run['n16'], run['n16'].dump0)

# plot stuff
savefig = 'vrprofs.pdf'
render = True
save = True
thickline = 1.5
thinline = 1.0

Fig 3

In [17]:
celln = -1

# celln
celln += 1

# configure plots
close_local = ppm.close_plot(celln,ifig,ptrack)
if close_local[0]:
    plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)
else:
    ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)

# use the same color scheme for plots
cbc = 0
ax = fig.add_subplot(111)

for runstr, myrun in run.items():
    if runstr not in run_plots:
        continue
        
    if runstr == 'n16':
        ax.plot(r[runstr],ur_rms[runstr],ls=cb(cbc)[0],color=cb(cbc)[2],label=r'v$_{r}$',
               linewidth=thickline)
    else:
        ax.plot(r[runstr],ur_rms[runstr],ls=cb(cbc)[0],color=cb(cbc)[2],linewidth=thinline)
cbc += 1

# loop through the different runs
for runstr, myrun in run.items():
    if runstr not in run_plots:
        continue
        
    if runstr == 'n16':
        ax.plot(r[runstr],ut[runstr],ls=cb(cbc)[0],color=cb(cbc)[2],
                label=r'v$_{\perp}$',linewidth=thickline)
    else:
        ax.plot(r[runstr],ut[runstr],ls=cb(cbc)[0],color=cb(cbc)[2],linewidth=thinline)
cbc += 1

# loop through the different runs
for runstr, myrun in run.items():
    if runstr not in run_plots:
        continue
        
    if runstr == 'n16':
        ax.plot(r[runstr],u[runstr],ls=cb(cbc+1)[0],color=cb(cbc)[2],label=r'v',
               linewidth=thickline)
    else:
        ax.plot(r[runstr],u[runstr],ls=cb(cbc+1)[0],color=cb(cbc)[2],linewidth=thinline)
cbc += 1

# add a shaded rectangle
ax.add_patch(patches.Rectangle((rminbase,myylim[0]),rmaxbase - rminbase,myylim[-1] - myylim[0],facecolor='blanchedalmond'))

# add the convective boundary lines
ax.vlines(rtop, *myylim, color='k', linestyle=':')
ax.vlines(rbot, *myylim, color='k', linestyle=':')

# plot details
ax.set_xlabel('r / Mm')
ax.set_ylabel(r'v$_{\mathrm{rms}}$ / km s$^{-1}$')
ax.set_ylim(myylim)
ax.set_xlim(myxlim)
ax.legend(frameon=False, loc='lower center')

# savefig
fig.tight_layout()
if save:
    fig.savefig(savefig,bbox_inches = "tight",dpi=300)
if not render:
    plt.close()

Ut averages

In [18]:
# boundaries, etc.
myylim = [0,200]
myxlim = [300,800]
thinline = 1.0

# plot stuff
savefig = 'gosh_rprofs.pdf'
render = True
save = True

Fig 10

In [19]:
celln = 200

# celln
celln += 1

# configure plots
close_local = ppm.close_plot(celln,ifig,ptrack)
if close_local[0]:
    plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)
else:
    ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)

# use the same color scheme for plots
cbc = 0
ax = fig.add_subplot(111)

# loop through the different runs
for runstr, myrun in run.items():
    ax.plot(myrun.times[myrun.simDump0:myrun.dumpMax] / 60., ut_gosh[runstr], color=cb(cbc)[2],
            label=runstr.capitalize(),linewidth=thinline)
    cbc += 1

# plot details
ax.set_xlabel('t / min')
ax.set_ylabel(r'$\langle v_{\perp, \mathrm{rms}} \rangle$ / km s$^{-1}$')
ax.vlines(myrun.times[1089]/60., *myylim, color='k', linestyle='-', linewidth=0.8)
ax.set_ylim(myylim)
ax.set_xlim(myxlim)
ax.legend(frameon=False)

# savefig
fig.tight_layout()
if save:
    fig.savefig(savefig,bbox_inches = "tight",dpi=300)
if not render:
    plt.close()

Mollweide Plots (Fig 4, 7 & 8)

Showcase the radial velocity and XH close to the middle of the convection zone and near the upper boundary. This is to show the changing scales of the flows and how isolated/homogenized the streams are at different radii. The images must be rasterized otherwise they are huge files

In [20]:
# other constants
radii_low = 14.5
radii_high = 23
radii_iter = collections.OrderedDict(zip(['low','high'],[radii_low, radii_high]))

# dictionary run and radii quantities
ur_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
XH_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
utheta_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
rhoFluct_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
rhour_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
npoints = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
phi_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
theta_r = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
radii_fluxFA = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs) 
flux_up = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)
FA_up = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)

# only run dependent quantities
utr_bound = collections.OrderedDict((run, [1]*len(runs)) for run in runs)
XH_avg_U = collections.OrderedDict((run, [1]*len(runs)) for run in runs)
XH_avg_D = collections.OrderedDict((run, [1]*len(runs)) for run in runs)
r_stream = collections.OrderedDict((run, [1]*len(runs)) for run in runs)

npoints_ut_boundary = collections.OrderedDict((run, [1]*len(runs)) for run in runs)
phi_ut_boundary = collections.OrderedDict((run, [1]*len(runs)) for run in runs)
theta_ut_boundary = collections.OrderedDict((run, [1]*len(runs)) for run in runs)

# calculate the number of points to sample properly at select radii
for runstr, myrun in run.items():
    for radstr, radius in radii_iter.items():
        npoints[runstr][radstr] = myrun.moms.sphericalHarmonics_lmax(radius)[2]

Get quantities on a sphere

In [21]:
# loop through the different runs
for runstr, myrun in run.items():
   
    # radii counter
    i = 0
    
    # grab the low and high radial velocities and XH
    for radstr, radius in radii_iter.items():
           
        # get the radial and tangential velocity component
        ur, utheta, uphi = myrun.moms.get_spherical_components('ux','uy','uz',fname=myrun.dump0)     
        tur_r, ttheta_r, tphi_r = myrun.moms.get_spherical_interpolation(ur,radius,fname=myrun.dump0,
                                                                        npoints=npoints[runstr][radstr],
                                                                        plot_mollweide=True)
        
        ur_r[runstr][radstr] = tur_r
        theta_r[runstr][radstr] = ttheta_r
        phi_r[runstr][radstr] = tphi_r
        
        utheta = myrun.moms.get('|ut|',fname=myrun.dump0)
        tutheta_r = myrun.moms.get_spherical_interpolation(utheta,radius,fname=myrun.dump0,
                                                          npoints=npoints[runstr][radstr])
        
        utheta_r[runstr][radstr] = tutheta_r
        
        # get the mass fraction of H
        fv = myrun.moms.get('fv',fname=myrun.dump0)
        
        # this is Xcld
        Xcld = fv/((1. - fv)*(airmu/cldmu) + fv)
        
        # convert to XH
        XH = Xcld * fkcld * atomicnoH / atomicnocld
        
        # get it on the sphere
        XH_r[runstr][radstr] = myrun.moms.get_spherical_interpolation(XH,radius,fname=myrun.dump0,
                                                                  npoints=npoints[runstr][radstr])
        
        # get the density fluctuations on the sphere
        rho = myrun.moms.get('rho',myrun.dump0)
        rho_r = myrun.moms.get_spherical_interpolation(rho,radius,fname=myrun.dump0,
                                                method='moments', npoints=npoints[runstr][radstr])
        rhoFluct_r[runstr][radstr] = (rho_r - rho_r.mean()) / rho_r.mean()
        
        # get the mass flux on the sphere
        rhour = rho * ur
        rhour_r[runstr][radstr] = myrun.moms.get_spherical_interpolation(rhour,radius,fname=myrun.dump0,
                                                                npoints=npoints[runstr][radstr])
        
        # increment radii counter
        i += 1

Work out the convective boundary from utheta condition

In [22]:
# factor
factor = 5.

# loop through the different runs
for runstr, myrun in run.items():
    
    # get the tangential velocity
#     ur, utheta, uphi = myrun.moms.get_spherical_components('ux','uy','uz',fname=myrun.dump0)
    utheta = myrun.moms.get('|ut|',fname=myrun.dump0)
    
    # create a radial axis for the utr_r rays, only near the convective boundary
    rbot_ut = 22
    rtop_ut = 28
    deex = myrun.moms.moms_gridresolution
    radial_axis = np.arange(0, int(factor*(rtop_ut - rbot_ut)/deex)) * (deex/factor) + rbot_ut
    
    # get the gradient on the sphere
    npoints_ut_boundary[runstr] = 100000
    ut_r, theta_ut_boundary[runstr], phi_ut_boundary[runstr] = myrun.moms.get_spherical_interpolation(
        utheta,radial_axis,fname=myrun.dump0,npoints=npoints_ut_boundary[runstr],plot_mollweide=True,method='trilinear')
    
    # smooth parameter
    N = 7
    utr_r = np.zeros(ut_r.shape)
    for point in range(ut_r.shape[1]):
        
        # smooth before derivative
        utr_r[:,point] = np.convolve(utr_r[:,point], np.ones((N,))/N, mode='same')
        utr_r[:,point] = ppm.cdiff(ut_r[:,point]) / ppm.cdiff(radial_axis)
        
    # create the zeros array for the boundary
    utr_bound[runstr] = np.zeros(utr_r.shape[1])
    
    # loop over the npoints, find the minimum
    for point in range(utr_r.shape[1]):
        min_utr = np.argmin(utr_r[:,point])
        utr_bound[runstr][point] = radial_axis[min_utr]
        
    print('Done {:s}'.format(runstr))
Done n15
Done n16
Done n17

What is the percent of the mass flux in the up and down directions at the two radii? What is their fractional areas?

In [23]:
# runs to calculate, this one takes a bit
runs_calc = ['n16']

# loop through the different runs
for runstr, myrun in run.items():
    
    # are we working with every run?
    if runstr not in runs_calc:
        continue
        
    # what radii for this run?
    min_r = m2r(myrun.mbot, myrun, myrun.dump0)
    max_r = m2r(myrun.mtop, myrun, myrun.dump0)
    deex = myrun.rprof.get('deex',0)
    radii_fluxFA[runstr] = np.linspace(min_r, max_r, int((max_r - min_r)/deex))
    
    FA_up[runstr] = np.zeros(len(radii_fluxFA[runstr]))
    flux_up[runstr] = np.zeros(len(radii_fluxFA[runstr]))
    
    # my radial velocity grid
    ur, utheta, uphi = myrun.moms.get_spherical_components('ux','uy','uz',fname=myrun.dump0)
    rho = myrun.moms.get('rho',fname=myrun.dump0)
    
    # loop through the radii
    for counter, radius in enumerate(radii_fluxFA[runstr]):
    
        # how many points to sample
        npoints = myrun.moms.sphericalHarmonics_lmax(radius)[2]
        
        # get the radial velocity on the sphere    
        tur_r = myrun.moms.get_spherical_interpolation(ur,radius,fname=myrun.dump0,
                                                                        npoints=npoints,
                                                                        plot_mollweide=False)
        trho_r = myrun.moms.get_spherical_interpolation(rho,radius,fname=myrun.dump0,
                                                                        npoints=npoints,
                                                                        plot_mollweide=False)
        
        # FA and flux
        positive_v = (tur_r > 0)
        
        FA_up[runstr][counter] = np.count_nonzero(positive_v) / npoints
        flux_up[runstr][counter] = (tur_r * trho_r)[positive_v].sum() / (np.abs(tur_r * trho_r).sum())

What are the up and down stream averages for the H mass fraction?

Ur Mollweide Plots

Plot dependent dictionaries

In [24]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']
radii_plot = ['low', 'high']
gridlines = 0.1

# plot stuff
savefig = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)

savefig['n15']['low'] = 'N15-vr-mollweide-low.pdf'
savefig['n15']['high'] = 'N15-vr-mollweide-high.pdf'
savefig['n16']['low'] = 'N16-vr-mollweide-low.pdf'
savefig['n16']['high'] = 'N16-vr-mollweide-high.pdf'
savefig['n17']['low'] = 'N17-vr-mollweide-low.pdf'
savefig['n17']['high'] = 'N17-vr-mollweide-high.pdf'
render = True
save = True
In [25]:
celln = 0

# loop through every run
for runstr, myrun in run.items():
    
    # loop through the radii
    for radstr, radius in radii_iter.items():

        # are we working with every run?
        if runstr not in runs_plot:
            continue

        # are we working with every radius?
        if radstr not in radii_plot:
            continue
            
        # celln
        celln += 1

        # configure plots
        close_local = ppm.close_plot(celln,ifig,ptrack)
        if close_local[0]:
            plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)
        else:
            ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)

        ax = plt.axes([0.01, 0.01, 0.98, 0.88], projection='mollweide')
     
        # convert units to km/s
        ur_plot = ur_r[runstr][radstr] * 1e3
        
        # if it is the higher radius, velocities are smaller and homogenized, use std
        if radstr == 'high':   
            # find the vmin/vmax
            vval = np.abs(ur_plot).mean() + 5*np.abs(ur_plot).std()
            
            ax.scatter(phi_r[runstr][radstr], theta_r[runstr][radstr], s=(72./fig.dpi)**2, 
                       marker=',',c=ur_plot,cmap='RdBu_r',vmin=-vval, vmax=vval,rasterized=True)
        else:
            # find the vmin/vmax
            vval = np.max(np.abs(ur_plot))

            # plot
            ax.scatter(phi_r[runstr][radstr], theta_r[runstr][radstr], s=(72./fig.dpi)**2, 
                         marker=',',c=ur_plot,cmap='RdBu_r',vmin=-vval,vmax=vval,rasterized=True)
                       
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(color='k', linewidth=gridlines)

        # create a colorbar for the axs1 tripcolor, use a new axes for it
        cax = fig.add_axes([0.01, 0.03, 0.98, 0.03])
        cbar1 = fig.colorbar(ax.collections[0], cax=cax, orientation='horizontal',format='%.1f',
                             label=r'v$_{\mathrm{r}}$  / km s$^{-1}$')
        
        # savefig
        if save:
            fig.savefig(savefig[runstr][radstr],bbox_inches = "tight",dpi=300)
        if not render:
            plt.close()
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))

XH Mollweide Plots

Plot dependent dictionaries

In [26]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']
radii_plot = ['low', 'high']
gridlines = 0.1

# plot stuff
savefig = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)

savefig['n15']['low'] = 'N15-XH-mollweide-low.pdf'
savefig['n15']['high'] = 'N15-XH-mollweide-high.pdf'
savefig['n16']['low'] = 'N16-XH-mollweide-low.pdf'
savefig['n16']['high'] = 'N16-XH-mollweide-high.pdf'
savefig['n17']['low'] = 'N17-XH-mollweide-low.pdf'
savefig['n17']['high'] = 'N17-XH-mollweide-high.pdf'
render = True
save = True
In [27]:
mollRatio = 3.5/2.5
In [28]:
celln = 4

# loop through every run
for runstr, myrun in run.items():

    # are we working with every run?
    if runstr not in runs_plot:
        continue
        
    # loop through the radii
    for radstr, radius in radii_iter.items():    

        # are we working with every run?
        if radstr not in radii_plot:
            continue
        
        # celln
        celln += 1

        # configure plots
        close_local = ppm.close_plot(celln,ifig,ptrack)
        if close_local[0]:
            plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)
        else:
            ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)

        ax = plt.axes([0.01, 0.01, 0.98, 0.88], projection='mollweide')
     
        # use log plot, the range of values should be the same for both in this case since it is log
        if radstr == 'low':
            XH_log = [1e-8, 1e-4]
        else:
            XH_log = [1e-8, 1e-3]
            
        # remove an XH below 1e-8
        XH_plot = XH_r[runstr][radstr]
        XH_plot[np.where(XH_plot < 1e-8)] = 0.
        
        # plot
        ax.scatter(phi_r[runstr][radstr], theta_r[runstr][radstr], s=(72./fig.dpi)**2, 
                       marker=',',c=np.log10(XH_plot),cmap='Spectral_r',vmin=np.log10(min(XH_log)),
                   vmax=np.log10(max(XH_log)),rasterized=True)
                       
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(color='k', linewidth=gridlines)

        # create a colorbar for the axs1 tripcolor, take part of the axes
        cax = fig.add_axes([0.01, 0.03, 0.98, 0.03])
        cbar1 = fig.colorbar(ax.collections[0], cax=cax, orientation='horizontal',format='%.1f',
                             label=r'$\log_{10}(\mathrm{X}_{\mathrm{H}}$)')

        # savefig
        if save:
            fig.savefig(savefig[runstr][radstr],bbox_inches = "tight",dpi=300)
        if not render:
            plt.close()
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:43: RuntimeWarning: divide by zero encountered in log10
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:43: RuntimeWarning: divide by zero encountered in log10
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))

Rho Mollweide

In [29]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']
radii_plot = ['low', 'high']
gridlines = 0.1

# plot stuff
savefig = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)

savefig['n15']['low'] = 'N15-rho-mollweide-low.pdf'
savefig['n15']['high'] = 'N15-rho-mollweide-high.pdf'
savefig['n16']['low'] = 'N16-rho-mollweide-low.pdf'
savefig['n16']['high'] = 'N16-rho-mollweide-high.pdf'
savefig['n17']['low'] = 'N17-rho-mollweide-low.pdf'
savefig['n17']['high'] = 'N17-rho-mollweide-high.pdf'
render = True
save = True
In [30]:
celln = 80

# loop through every run
for runstr, myrun in run.items():

    # are we working with every run?
    if runstr not in runs_plot:
        continue
        
    # loop through the radii
    for radstr, radius in radii_iter.items():    

        # are we working with every run?
        if radstr not in radii_plot:
            continue
        
        # celln
        celln += 1

        # configure plots
        close_local = ppm.close_plot(celln,ifig,ptrack)
        if close_local[0]:
            plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)
        else:
            ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)

        ax = plt.axes([0.01, 0.01, 0.98, 0.88], projection='mollweide')
     
        # even bounds
        rho_plot = rhoFluct_r[runstr][radstr]
        
        if radstr == 'high':
            rho_bounds = np.mean(rho_plot) + 6*np.std(rho_plot)
        else:
            rho_bounds = np.max(np.abs(rho_plot))
        
        # plot
        ax.scatter(phi_r[runstr][radstr], theta_r[runstr][radstr], s=(72./fig.dpi)**2, 
                       marker=',',c=rho_plot,cmap='BrBG',rasterized=True,vmin=-rho_bounds,
                   vmax=rho_bounds)
                       
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(color='k', linewidth=gridlines)

        # create a colorbar for the axs1 tripcolor, take part of the axes
        cax = fig.add_axes([0.01, 0.03, 0.98, 0.03])
        cbar1 = fig.colorbar(ax.collections[0], cax=cax, orientation='horizontal',format='%.0e',
                             label=r'$\left( \rho - \langle \rho \rangle \right) / \langle \rho \rangle$')

        # savefig
        if save:
            fig.savefig(savefig[runstr][radstr],bbox_inches = "tight",dpi=300)
        if not render:
            plt.close()
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))

Utheta Mollweide

In [31]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']
radii_plot = ['low', 'high']
gridlines = 0.1

# plot stuff
savefig = collections.OrderedDict((run, 
                                collections.OrderedDict(zip(radii_iter.keys(), 
                                                            [1]*len(radii_iter.keys())))) for run in runs)

savefig['n15']['low'] = 'N15-vt-mollweide-low.pdf'
savefig['n15']['high'] = 'N15-vt-mollweide-high.pdf'
savefig['n16']['low'] = 'N16-vt-mollweide-low.pdf'
savefig['n16']['high'] = 'N16-vt-mollweide-high.pdf'
savefig['n17']['low'] = 'N17-vt-mollweide-low.pdf'
savefig['n17']['high'] = 'N17-vt-mollweide-high.pdf'
render = True
save = True
In [32]:
celln = 8

# loop through every run
for runstr, myrun in run.items():

    # are we working with every run?
    if runstr not in runs_plot:
        continue
        
    # radii counter 
    i = 0
    
    # loop through the radii
    for radstr, radius in radii_iter.items():    

        # are we working with every run?
        if radstr not in radii_plot:
            continue
        
        # celln
        celln += 1

        # configure plots
        close_local = ppm.close_plot(celln,ifig,ptrack)
        if close_local[0]:
            plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)
        else:
            ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
            ppm.add_plot(celln,ifig,ptrack)

        ax = plt.axes([0.01, 0.01, 0.98, 0.88], projection='mollweide')
        
        # convert to km/s
        utheta_plot = utheta_r[runstr][radstr] * 1e3
        
        # keep same colorbar limits
        if i == 0:
            uthmin = np.min(utheta_plot)
            uthmax = np.max(utheta_plot)
        
        # plot
        ax.scatter(phi_r[runstr][radstr], theta_r[runstr][radstr], s=(72./fig.dpi)**2, 
                       marker=',',c=utheta_plot,cmap='YlGnBu_r',vmin=uthmin,vmax=uthmax,rasterized=True)
                       
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(color='k', linewidth=gridlines)

        # create a colorbar for the axs1 tripcolor, take part of the axes
        cax = fig.add_axes([0.01, 0.03, 0.98, 0.03])
        cbar1 = fig.colorbar(ax.collections[0], cax=cax, orientation='horizontal',format='%.1f',
                             label=r'$|v_{\perp}|$ / km s$^{-1}$')

        # savefig
        if save:
            fig.savefig(savefig[runstr][radstr],bbox_inches = "tight",dpi=300)
        if not render:
            plt.close()
            
        i += 1
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))
/usr/local/lib/python3.6/dist-packages/matplotlib/projections/geo.py:414: RuntimeWarning: invalid value encountered in arcsin
  theta = np.arcsin(y / np.sqrt(2))

Utheta Boundary

In [33]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']
gridlines = 0.1

def ubound(items):
    return utr_bound[items[0]].mean() + (utr_bound[items[0]].mean() - items[1])

# plot stuff
savefig = collections.OrderedDict(zip(run.keys(), ['N15-vt-boundary.pdf', 'N16-vt-boundary.pdf',
                                                  'N17-vt-boundary.pdf']))
render = True
save = True

# find the boundaries that I want
utheta_lbound = collections.OrderedDict(zip(run.keys(), [23, 23, 23]))
utheta_ubound = collections.OrderedDict(zip(run.keys(), map(ubound, utheta_lbound.items())))
In [34]:
celln = 12

# loop through every run
for runstr, myrun in run.items():
    
    # are we working with every run?
    if runstr not in runs_plot:
        continue

    # celln
    celln += 1

    # configure plots
    close_local = ppm.close_plot(celln,ifig,ptrack)
    if close_local[0]:
        plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    else:
        ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)

    ax = plt.axes([0.01, 0.01, 0.98, 0.88], projection='mollweide')
    
    # remove points that are part of the upper or lower boundaries of my search for the boundary
    remove = np.logical_not(((utr_bound[runstr] <= rbot_ut) & (utr_bound[runstr] >= rtop_ut)))

    # 3 sigma bounds
    utr_bound_mean = utr_bound[runstr][remove].mean()
    uval = utr_bound_mean + 3 * np.std(utr_bound[runstr][remove])
    lval = utr_bound_mean - 3 * np.std(utr_bound[runstr][remove])
    
    # plot
    ax.scatter(phi_ut_boundary[runstr][remove], theta_ut_boundary[runstr][remove], s=(72./fig.dpi)**2, 
                       marker=',',c=utr_bound[runstr][remove],cmap='PuOr',rasterized=True,
                vmin=lval, vmax=uval)

    ax.set_xticklabels([])
    ax.set_yticklabels([])
    ax.grid(color='k', linewidth=gridlines)

    # create a colorbar for the axs1 tripcolor, take part of the axes
    cax = fig.add_axes([0.01, 0.03, 0.98, 0.03])
    cbar1 = fig.colorbar(ax.collections[0], cax=cax, orientation='horizontal',format='%.1f',
                         label=r'Convective Boundary / Mm')

    # savefig
    if save:
        fig.savefig(savefig[runstr],bbox_inches = "tight",dpi=300)
    if not render:
        plt.close()

Fractional Area and Mass Flux

In [35]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']

# plot stuff
savefig = collections.OrderedDict(zip(run.keys(), ['N15-fractional.pdf', 'N16-fractional.png',
                                                  'N17-fractional.pdf']))

render = False
save = False
In [36]:
celln = 89

# loop through every run
for runstr, myrun in run.items():
    
    # are we working with every run?
    if runstr not in runs_plot:
        continue

    # celln
    celln += 1

    # configure plots
    close_local = ppm.close_plot(celln,ifig,ptrack)
    if close_local[0]:
        plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    else:
        ifig += 1; fig = plt.figure(ifig,figsize=(mollSize*mollRatio,mollSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)

    ax = fig.add_subplot(111)

    # plot fractional Area
    ax.plot(radii_fluxFA[runstr], FA_up[runstr], color=cb(0)[2], ls='-', label='Fractional Area')
    ax.plot(radii_fluxFA[runstr], 1 - FA_up[runstr], color=cb(0)[2], ls='--')
    
    # plot fractional mass flux
    ax.plot(radii_fluxFA[runstr], flux_up[runstr], color=cb(1)[2], ls='-', label='Fractional Mass Flux')
    ax.plot(radii_fluxFA[runstr], 1 - flux_up[runstr], color=cb(1)[2], ls='--')
    
    # set xlim, ylim
    ax.set_xlim([radii_fluxFA[runstr].min(), radii_fluxFA[runstr].max()])
    ax.set_xlabel('r / Mm')
#     ax.set_ylabel('Fractional')
    ax.legend(frameon=False)
    
    # savefig
    if save:
        fig.savefig(savefig[runstr],bbox_inches = "tight",dpi=300)
    if not render:
        plt.close()

Ur Spectrum Plots

What is the radial velocity spectrum for each run? Compare

In [37]:
# dictionary that are run dependent
radii = collections.OrderedDict((run, []) for run in runs)
power_ur_l_r_lmax = collections.OrderedDict((run, []) for run in runs)
radii_l = collections.OrderedDict((run, []) for run in runs)
power_ur_l_r = collections.OrderedDict((run, []) for run in runs)
l_r = collections.OrderedDict((run, []) for run in runs)
offset = collections.OrderedDict((run, []) for run in runs)

# constants
lmax = 50
min_r = m2r(run['n16'].mbot, run['n16'], run['n16'].dump0)
max_r = m2r(run['n16'].mtop, run['n16'], run['n16'].dump0)
radii_low = 14.5
radii_high = 23
myl_radii = [8.5, radii_low, radii_high]
offset_r = [5e-2, 5e-1, 1e3]

# loop through the different runs
for runstr, myrun in run.items():
    deex = myrun.rprof.get('deex',0)
    radii[runstr] = np.linspace(min_r,max_r, int((max_r - min_r)/deex))
    power_ur_l_r_lmax[runstr] = np.zeros((lmax, len(radii[runstr])))
    radii_l[runstr] = myl_radii.copy()
In [38]:
# loop through the different runs
for runstr, myrun in run.items():
    
    # grab the radial velocity on the grid
    ur = myrun.moms.get_spherical_components('ux','uy','uz',fname=myrun.dump0)[0]
    
    # for the spectrogram
    # for each radii, get power spectrum
    for counter, radius in enumerate(radii[runstr]):
        
        # what is lmax at this radius?
        lmax_r, N, npoints = myrun.moms.sphericalHarmonics_lmax(radius)
        
        # Calculate spherical harmonics up to lmax
        ur_interp = myrun.moms.sphericalHarmonics_format(ur, radius, lmax=lmax)
        
        # get coefficients and power, drop l=0!
        coeffs = pyshtools.expand.SHExpandDH(ur_interp, sampling=2)
        power_ur_l_r_lmax[runstr][:, counter] = pyshtools.spectralanalysis.spectrum(coeffs,
                                                                                     unit='per_l')[1:]

        # set to zero if lmax_r < lmax
        if lmax_r < lmax:
            power_ur_l_r_lmax[runstr][int((lmax_r-1)):, counter] = 0
            
    # for the particular l radii
    for counter, radius in enumerate(radii_l[runstr]):
        
        # what is lmax at this radius?
        lmax_r, N, npoints = myrun.moms.sphericalHarmonics_lmax(radius)
        
        # setup the correct power array
        power_array = np.zeros(lmax_r)
        
        # Calculate spherical harmonics up to lmax_r
        ur_interp = myrun.moms.sphericalHarmonics_format(ur, radius, lmax=lmax_r)
        
        # get coefficients and power, drop l=0!
        coeffs = pyshtools.expand.SHExpandDH(ur_interp, sampling=2)
        power_array = pyshtools.spectralanalysis.spectrum(coeffs, unit='per_l')[1:]
        power_ur_l_r[runstr].append(power_array)
        
        # create the l array
        l_r[runstr].append(np.arange(1,lmax_r+1))
        
        # arbitrary offset
        offset[runstr].append(offset_r[counter])

Ur Spectrogram Fig 5 & 6

Plot dependent dictionaries

In [39]:
# if i want to look at one particular run
runs_plot = ['n15', 'n16']

# plot stuff
savefig = collections.OrderedDict(zip(run.keys(), ['N15-vrspectra.pdf', 'N16-vrspectra.pdf',
                                                  'N17-vrspectra.pdf']))
render = True
save = True
In [40]:
# celln
celln = 14

# loop through the different runs
for runstr, myrun in run.items():
    
    if runstr not in runs_plot:
        continue

    celln += 1
    
    # configure plots
    close_local = ppm.close_plot(celln,ifig,ptrack)
    if close_local[0]:
        plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(horizSize*horizRatio,horizSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    else:
        ifig += 1; fig = plt.figure(ifig,figsize=(horizSize*horizRatio,horizSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
        
    ax = fig.add_subplot(111)
    
    # I need to normalize by the logarithm
    scaled_power = power_ur_l_r_lmax[runstr] / np.max(power_ur_l_r_lmax[runstr],axis=0)[np.newaxis, :]
    vmax = 1.0
    vmin = 1e-2
    lognorm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=True)

    # imshow
    ax.imshow(scaled_power,interpolation='none',origin='lower',norm=lognorm,aspect='auto',
             extent=(radii[runstr][0]-0.5*np.diff(radii[runstr])[0],
                     radii[runstr][-1] + 0.5*np.diff(radii[runstr])[0],0.5,0.5+lmax),
             rasterized=True)

    # create a colorbar for the axs1 tripcolor, use a new axes for it
    cbar1 = fig.colorbar(ax.images[0], ax=ax, orientation='vertical', 
                 label=r'S$_{cc}(\ell, r)$ / Max(S$_{cc}(r)$)',aspect=25)

#     ax.axes.set_position([0.01, 0.01, 0.98, 0.78])
    # plot details
    ax.set_xlabel('r / Mm')
    ax.set_ylabel(r'$\ell$')
    fig.tight_layout()
    
    # savefig
    if save:
        fig.savefig(savefig[runstr],bbox_inches = "tight",dpi=300)
    if not render:
        plt.close()

$\ell$ Plot

Plot dependent dictionaries

In [41]:
# plot stuff
savefig = 'lspectrum.pdf'
render = True
save = True
thickline = 1.5
thinline = 0.8
In [42]:
# celln
celln = 16
        
# configure plots
close_local = ppm.close_plot(celln,ifig,ptrack)
if close_local[0]:
    plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)
else:
    ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)

ax = fig.add_subplot(111)
cbc = 0

# loop over length of the lists
for i in range(len(power_ur_l_r['n15'])):
    ax.plot(l_r['n15'][i],power_ur_l_r['n15'][i] / power_ur_l_r['n15'][2][0] * offset['n15'][i],ls=cb(cbc)[0],color=cb(cbc)[2],
           linewidth=thinline)
    ax.plot(l_r['n16'][i],power_ur_l_r['n16'][i] / power_ur_l_r['n15'][2][0] * offset['n16'][i],ls=cb(cbc)[0],color=cb(cbc)[2],
           linewidth=thickline,label=r'r = {:0.1f}Mm'.format(myl_radii[i]))
    cbc += 1
   
# plot details
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'Scaled Power')
ax.set_xlim([1,500])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(frameon=False, loc='lower center')

# savefig
fig.tight_layout()
if save:
    fig.savefig(savefig,bbox_inches = "tight",dpi=300)
if not render:
    plt.close()

Entrainment Rate Fig 9

How is the entrainment rate evolving as a function of time for the two models during the advection post-processing?

In [43]:
# dictionary that are run dependent
var_value = collections.OrderedDict((run, []) for run in runs)
ut_scaleheight_r = collections.OrderedDict((run, []) for run in runs)
offset = collections.OrderedDict((run, []) for run in runs)
dumpStart = collections.OrderedDict((run, []) for run in runs)
dumpEnd = collections.OrderedDict((run, []) for run in runs)
burn_func = collections.OrderedDict((run, []) for run in runs)

# used for Paper derived quantities
mdot_fit = collections.OrderedDict((run, []) for run in runs)
mXcldir = collections.OrderedDict((run, []) for run in runs)
mXcldb = collections.OrderedDict((run, []) for run in runs)
entr_time = collections.OrderedDict((run, []) for run in runs)

# constants
r_min = 9.
r_max = 28.

# N15 linear regime
index = np.argmin(abs(run['n15'].times - 300*60))

# loop through the different runs
for runstr, myrun in run.items():
    
    # range of dumps to calculate entrainment
    dumpStart[runstr] = myrun.simDump0
    dumpEnd[runstr] = myrun.dumpMax
        
    dumps = list(range(dumpStart[runstr], dumpEnd[runstr]))
    var_value[runstr] = np.zeros(len(dumps))
    ut_scaleheight_r[runstr] = np.zeros(len(dumps))
    offset[runstr] = np.zeros(len(dumps))
    
    # need every dump to get var_value and the tangential velocity scale height
    # I will remove an offset from this boundary for integration limit
    if runstr == 'n17':
        offset[runstr][:] = 5.0
    else:
        offset[runstr][:] = 0.5
        
    for counter,dump in enumerate(dumps):
        var_value[runstr][counter] = float(m2r(myrun.mtop, myrun, dump)) - offset[runstr][counter]
    
    # rprof burn func
    burn_func[runstr] = myrun.rprof.compute_rhodot_C12pg

Plot dependent dictionaries

In [44]:
# if i want to look at one particular run and/or radii
runs_plot = ['n15','n16','n17']

# plot stuff
savefig = collections.OrderedDict(zip(run.keys(), ['N15-entrainment.pdf', 'N16-entrainment.pdf',
                                                  'N17-entrainment.pdf']))
fit_interval = collections.OrderedDict(zip(run.keys(), 
                [[run['n15'].simTime0, run['n15'].times[np.argmin(abs(run['n15'].times - 300*60))]], None,
                [run['n17'].simTime0, run['n17'].times[np.argmin(abs(run['n17'].times - 300*60))]]]))
render = True
save = True

Mixing Timescales

In [45]:
# since we are burning material, we need to include the burn function (rhodot) and the Tcorr
burn_args = {'T9corr_params':{'kind':1, 'params':{'a':0.46, 'b':0.77}}}
celln = 17

# loop through the different runs
for runstr, myrun in run.items():

    # configure plots
    close_local = ppm.close_plot(celln,ifig,ptrack)
    if close_local[0]:
        plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    else:
        ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)

    ax = fig.add_subplot(111)
    cbc = 0
    
    # run the method
    dumps = list(range(dumpStart[runstr], dumpEnd[runstr]))
    

    time, mir, mb = myrun.rprof.entr_rate(dumps, r_min, r_max, var='R', criterion='value',
                                var_value=var_value[runstr], burn_func=burn_func[runstr], 
                                burn_args=burn_args, show_plots=False, return_time_series=True, 
                                fit_interval=fit_interval[runstr])

    
    # save these in dictionary for later analysis
    mXcldir[runstr] = mir
    mXcldb[runstr] = mb
    entr_time[runstr]  = time
    
    # for later analysis, save n16 entrainment rate
    if runstr == 'n16':
        n16_mentrained = (mir + mb)
    
    # compute the entrained material
    mentr = mir + mb
    
    # compute the linear fit
    if fit_interval[runstr] is not None:
        fit_idx0 = np.argmin(np.abs(time - fit_interval[runstr][0]))
        fit_idx1 = np.argmin(np.abs(time - fit_interval[runstr][1])) + 1
        fit_range = range(fit_idx0, fit_idx1)
    else:
        fit_range = range(0,len(time))
        
    mtot_fc = np.polyfit(time[fit_range], mentr[fit_range], 1)
    mtot_fit = mtot_fc[0]*time[fit_range] + mtot_fc[1]
    mdot = mtot_fc[0]
        
    # save the slope for later analysis
    mdot_fit[runstr] = mdot
    
    # linear fit
    mdot_str = '{:e}'.format(mdot)
    parts = mdot_str.split('e')
    mantissa = float(parts[0])
    exponent = int(parts[1])
    lbl = (r'$\dot{{\mathrm{{M}}}}_\mathrm{{e}} = {:.2f} '
           r'\times 10^{{{:d}}}$ M$_\odot$ s$^{{-1}}$').\
           format(mantissa, exponent)

    ax.plot(time[fit_range]/60., mtot_fc[0]*time[fit_range] + mtot_fc[1], color=cb(4)[2], 
            ls='-', label=lbl, zorder=100)
    if len(fit_range) != len(time):
        new_range = range(len(fit_range),len(time))
        ax.plot(time[new_range]/60., mtot_fc[0]*time[new_range] + mtot_fc[1], color=cb(4)[2], 
            ls=':', zorder=100)

    ax.plot(time/60., mir, ':', color=cb(3)[2], label='present')
    ax.plot(time/60., mb, '--', color=cb(6)[2], label='burnt')
    ax.plot(time/60., mentr, color=cb(5)[2], label='total')

    # plot details
    ax.set_xlabel('t / min')
    ax.set_ylabel(r'M$_{\mathrm{e}}$ / M$_{\odot}$')
    myylim = [0, ax.get_ylim()[-1]]
    ax.set_ylim(myylim)
    ax.set_xlim([0,ax.get_xlim()[-1]])
    if runstr == 'n17':
        ax.vlines(myrun.times[1089]/60., *myylim, color='k', linestyle='-', linewidth=0.8)
    ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
    ax.legend(loc='upper left',frameon=False)
    
    # savefig
    if save:
        fig.savefig(savefig[runstr],bbox_inches = "tight",dpi=300)
    if not render:
        plt.close(fig)
Computing boundary radius...
Done.
Integrating entrained mass...
3455/3455 cycles processed.
Done.
Computing boundary radius...
Done.
Integrating entrained mass...
1517/1517 cycles processed.
Done.
Computing boundary radius...
Done.
Integrating entrained mass...
1272/1272 cycles processed.
Done.

How does the radial velocity timescale compare to the horizontal timescale

In [46]:
# dictionary that are run dependent
dt_radial = collections.OrderedDict((run, []) for run in runs)
dt_horizontal = collections.OrderedDict((run, []) for run in runs)
r_shell = collections.OrderedDict((run, []) for run in runs)
In [47]:
# loop through the different runs
for runstr, myrun in run.items():

    # hydro is my hcore object
    hydro = myrun

    # get the "r_onShell" quantity and other vars
    deex = hydro.rprof.get('deex', 0)
    rconv_bot = m2r(hydro.mbot, hydro, hydro.dump0)
    rconv_top = m2r(hydro.mtop, hydro, hydro.dump0)
    r_onShell = np.linspace(rconv_bot, rconv_top, int((rconv_top - rconv_bot)/deex))
    power_func = [gamma_weightedPower]
    power_func_kwargs = [{}]
    my_lmax = 500

    gamma, dtr, dth = shellDerive(myrun.dump0, r_onShell)
    
    # set to the dictionaries
    dt_radial[runstr] = dtr
    dt_horizontal[runstr] = dth
    r_shell[runstr] = r_onShell

Plot dependent dictionaries

In [48]:
# if i want to look at one particular run and/or radii
runs_plot = ['n16']

# plot stuff
savefig = collections.OrderedDict(zip(run.keys(), ['N15-timescales.pdf', 'N16-timescales.pdf',
                                                  'N17-timescales.pdf']))
render = True
save = True

Fig 12

In [49]:
celln = 23

# loop through every run
for runstr, myrun in run.items():
    
    # are we working with every run?
    if runstr not in runs_plot:
        continue

    # celln
    celln += 1

    # configure plots
    close_local = ppm.close_plot(celln,ifig,ptrack)
    if close_local[0]:
        plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    else:
        ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
        ppm.add_plot(celln,ifig,ptrack)
    
    ax = fig.add_subplot(111)
    cbc = 0
    
    # add the dmhdt
    ax.plot(r_shell[runstr][1:],dt_radial[runstr].flatten(),ls=cb(cbc)[0],color=cb(cbc+8)[2],
            label=r'$\delta t_{\mathrm{r}}$')
    cbc += 1
    
    ax.plot(r_shell[runstr][1:],dt_horizontal[runstr].flatten(),ls=cb(0)[0],color=cb(cbc+14)[2],
            label=r'$\delta t_{\mathrm{h}}$')
    
    # plot details
    ax.set_xlabel('r / Mm')
    ax.set_ylabel(r'Timescale / s')
    ax.set_xlim([r_shell[runstr][0], r_shell[runstr][-1]])
    ax.set_yscale('log')
    ax.legend(frameon=False, loc='center')
 
    # savefig
    fig.tight_layout()
    if save:
        fig.savefig(savefig[runstr],bbox_inches = "tight",dpi=300)
    if not render:
        plt.close()

Numerical Values in Paper

This section will print out all of the relevant quantities that the paper cites. Some are calculated throughout the notebook while others are directly calculated here. Make sure you run the whole notebook

Heating region in the convection zone

In [50]:
# heating region constants
radbase = 7.282
dlayerbot = 2.0

rminbase = radbase + .5 * dlayerbot
rmaxbase = radbase + 1.5 * dlayerbot

print('The top heating boundary is at {:.2f}Mm and the bottom heating boundary is at {:.2f}Mm'
     .format(rminbase, rmaxbase))
The top heating boundary is at 8.28Mm and the bottom heating boundary is at 10.28Mm

Transient State

How long does the transient state last for approximately in each run?

In [51]:
# plot vrms as a function of time
celln = 30
render = True

# celln
celln += 1

# configure plots
close_local = ppm.close_plot(celln,ifig,ptrack)
if close_local[0]:
    plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)
else:
    ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)

ax = fig.add_subplot(111)
cbc = 0

# loop through every run
for runstr, myrun in run.items():
    
    Urmax = np.zeros(len(myrun.dumps))
    
    # loop through every dump, get max |Ur|
    for i,dump in enumerate(myrun.dumps):
        U = myrun.rprof.get('|U|',fname=dump)
        Ut = myrun.rprof.get('|Ut|',fname=dump)
        Urmax[i] = np.sqrt(U**2 - Ut**2).max()
        
    ax.plot(myrun.times / 60., Urmax, ls=cb(cbc)[0], color=cb(cbc)[2],label=runstr.capitalize())
    cbc += 1
    
ax.set_xlim([0,100])
ax.set_ylabel(r'Ur / Mm s$^{-1}$')
ax.set_xlabel('t / min')
ax.legend()

if not render:
    plt.close()
In [52]:
# roughly 30 minutes into the simulation all transients are gone, what dump?
transient = np.argmin(abs(run['n16'].times - 60*30))
print('The dump that the transients end in ALL sims is {:d}'.format(transient))
The dump that the transients end in ALL sims is 65

Convective Properties

What is the convective turnover time as a function of time for each simulation?

In [53]:
# dictionaries that are run dependent
tau_conv = collections.OrderedDict((run, []) for run in runs)
ur_avg = collections.OrderedDict((run, []) for run in runs)
In [54]:
# compute the convective turnover time, based on SC boundary, for all times and all runs
for runstr, myrun in run.items():
    
    tau_conv[runstr] = np.zeros(len(myrun.dumps[myrun.simDump0:]))
    ur_avg[runstr] = np.zeros(len(myrun.dumps[myrun.simDump0:]))
    
    # for every dump that we post-process
    for i,dump in enumerate(myrun.dumps[myrun.simDump0:]):
        
        # schwarz
        rtop = m2r(myrun.mtop, myrun, dump)
        rbot = m2r(myrun.mbot, myrun, dump)
        
        # velocities and tau_conv
        r = myrun.rprof.get('R',fname=dump,resolution='l')
        ur = np.sqrt(myrun.rprof.get('|U|',fname=dump)**2 - myrun.rprof.get('|Ut|',fname=dump)**2)
        
        rtopi = np.argmin(abs(r - rtop))
        rboti = np.argmin(abs(r - rbot))
        
        ur_avg[runstr][i] = (ur[rtopi:rboti+1].mean())
        tau_conv[runstr][i] = ((rtop - rbot) / ur_avg[runstr][i])
In [55]:
# plot tau_conv as a function of time
celln = 31
render = True

# celln
celln += 1

# configure plots
close_local = ppm.close_plot(celln,ifig,ptrack)
if close_local[0]:
    plt.close(fig); ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)
else:
    ifig += 1; fig = plt.figure(ifig,figsize=(stdRatio*stdSize,stdSize),dpi=300)
    ppm.add_plot(celln,ifig,ptrack)

ax = fig.add_subplot(111)
cbc = 0

# loop through every run
for runstr, myrun in run.items():
        
    ax.plot(myrun.times[myrun.simDump0:] / 60., tau_conv[runstr] / 60., ls=cb(cbc)[0], 
            color=cb(cbc)[2],label=runstr.capitalize())
    cbc += 1
    
ax.set_xlim([100,1700])
ax.set_ylabel(r'$\tau_{\mathrm{conv}}$ / min')
ax.set_xlabel('t / min')
ax.legend()

if not render:
    plt.close()
In [56]:
# loop through every run
for runstr, myrun in run.items():

    # the average convective turnover time during quasistatic period
    quasistatic = np.argmin(abs(myrun.times - 300))
    avg_conv = tau_conv[runstr][0:(quasistatic-myrun.simDump0)].mean()
    
    print('{:s} average convective turnover time the post-processing simulation is {:.2f} min'
         .format(runstr.capitalize(),avg_conv / 60.))
    
    # How long is 100 dumps?
    print('A single dump has an interval of roughly {:.2f} s'.format(np.diff(myrun.times).mean()))
    print('The 100 dump repeat at the start is roughly {:.2f} min or {:.1f} convective turnovers for N16'
         .format(100*np.diff(myrun.times).mean()/60., 100*np.diff(myrun.times).mean()/avg_conv))
    print('')
N15 average convective turnover time the post-processing simulation is 19.16 min
A single dump has an interval of roughly 27.60 s
The 100 dump repeat at the start is roughly 45.99 min or 2.4 convective turnovers for N16

N16 average convective turnover time the post-processing simulation is 18.02 min
A single dump has an interval of roughly 27.60 s
The 100 dump repeat at the start is roughly 45.99 min or 2.6 convective turnovers for N16

N17 average convective turnover time the post-processing simulation is 9.00 min
A single dump has an interval of roughly 27.60 s
The 100 dump repeat at the start is roughly 45.99 min or 5.1 convective turnovers for N16

How are the statistics of the upper boundary determined by v_perp at dump0/time0?

What are the convective boundaries for both determinations at dump0/time0?

In [57]:
# loop through every run
for runstr, myrun in run.items():
    
    # grab schwarz boundary
    rtop = m2r(myrun.mtop, myrun, myrun.dump0)
    print('{:s} schwarz boundary is {:0.2f}Mm, v_perp boundary is {:0.2f}Mm with 1sigma {:0.2f}Mm'
          .format(runstr.capitalize(), rtop, float(utr_bound[runstr].mean()), 
                  1*float(utr_bound[runstr].std())))
    
    # get the pressure scale height there
    Hp = myrun.rprof.compute_Hp(myrun.dump0)
    r = myrun.rprof.get('R',fname=myrun.dump0,resolution='l')
    Hp_bound = Hp[np.argmin(abs(float(utr_bound[runstr].mean()) - r))]
    print('The pressure scale height at boundary is {:0.2f}Mm'.format(Hp_bound))
    print('The grid resolution is {:0.2f}Mm'.format(myrun.moms.moms_gridresolution))    
    print('')
    
N15 schwarz boundary is 23.86Mm, v_perp boundary is 23.43Mm with 1sigma 0.48Mm
The pressure scale height at boundary is 1.02Mm
The grid resolution is 0.37Mm

N16 schwarz boundary is 23.84Mm, v_perp boundary is 23.56Mm with 1sigma 0.46Mm
The pressure scale height at boundary is 0.97Mm
The grid resolution is 0.18Mm

N17 schwarz boundary is 26.74Mm, v_perp boundary is 26.69Mm with 1sigma 1.19Mm
The pressure scale height at boundary is 1.20Mm
The grid resolution is 0.25Mm

Entrainment

What is the linear fit to the entrainment rates?

In [58]:
# loop through every run
for runstr, myrun in run.items():
    print('{:s} mdot linear fit, is {:.2e} Msolar/s'.format(runstr.capitalize(), mdot_fit[runstr]))
N15 mdot linear fit, is 1.06e-11 Msolar/s
N16 mdot linear fit, is 7.21e-12 Msolar/s
N17 mdot linear fit, is 1.08e-10 Msolar/s

How does the integrated entrainment of mass compare with the smallest cell in post-processing?

How many post-processing cells are there?

How many factors does the entrainment rate increase during a GOSH?

In [60]:
# loop through every run
for runstr, myrun in run.items():
    
    # read in shell file
    data, header = readSCFile(myrun.initbase, 'shell', myrun.dump0)
    min_mcell = np.min(np.abs(np.diff(data['m']))/2.)
    
    # total entrained material
    tot_entrained = (mXcldir[runstr] + mXcldb[runstr])[-1]
    
    print('The smallest mass cell is {:.2e} while the total entrained material is {:.2e}'
          .format(min_mcell * 1e27/Msun, tot_entrained))
    print('There are {:d} cells in the post-processing'.format(len(data['m']-1)))
    
    # calculate the entrainment rate
    myentr_rate =  ppm.cdiff(mXcldir[runstr] + mXcldb[runstr]) / ppm.cdiff(entr_time[runstr])
    print('The maximum GOSH entrainment rate is a factor of {:.1f} times larger than the linear fit'
         .format(np.max(myentr_rate)/mdot_fit[runstr]))
    print('')
The smallest mass cell is 4.01e-06 while the total entrained material is 1.17e-06
There are 172 cells in the post-processing
The maximum GOSH entrainment rate is a factor of 3.5 times larger than the linear fit

The smallest mass cell is 2.59e-06 while the total entrained material is 3.07e-07
There are 257 cells in the post-processing
The maximum GOSH entrainment rate is a factor of 1.8 times larger than the linear fit

The smallest mass cell is 2.61e-06 while the total entrained material is 8.02e-06
There are 257 cells in the post-processing
The maximum GOSH entrainment rate is a factor of 86.5 times larger than the linear fit

In [ ]: